[IA64] Allow multiple-time mmap of the privcmd device
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Tue, 16 May 2006 15:05:36 +0000 (09:05 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Tue, 16 May 2006 15:05:36 +0000 (09:05 -0600)
Allow multiple-time mmap of the privcmd device. The old implemntation
doesn't allow multiple-time mmap for a same struct file_struct.
However xend or qemu does multiple-time mmap.
This patch affects only dom0 vp model. With this patch multiple domu can
boot simultaneously and vti domain can boot on vp model dom0.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
linux-2.6-xen-sparse/arch/ia64/xen/hypervisor.c

index 8f642f3a34e142887cc643646fcfd5b210094a3b..586f37218feaf4f5102240c3dcb104b96973813d 100644 (file)
@@ -23,6 +23,7 @@
 //#include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/bootmem.h>
+#include <linux/vmalloc.h>
 #include <asm/page.h>
 #include <asm/hypervisor.h>
 #include <asm/hypercall.h>
@@ -363,7 +364,6 @@ struct address_space xen_ia64_foreign_dummy_mapping;
 struct xen_ia64_privcmd_entry {
        atomic_t        map_count;
        struct page*    page;
-       unsigned long   mfn;
 };
 
 static void
@@ -371,9 +371,13 @@ xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
 {
        atomic_set(&entry->map_count, 0);
        entry->page = NULL;
-       entry->mfn = INVALID_MFN;
 }
 
+//TODO alloc_page() to allocate pseudo physical address space is 
+//     waste of memory.
+//     When vti domain is created, qemu maps all of vti domain pages which 
+//     reaches to several hundred megabytes at least.
+//     remove alloc_page().
 static int
 xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
                            unsigned long addr,
@@ -418,7 +422,6 @@ xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
        } else {
                atomic_inc(&entry->map_count);
                entry->page = page;
-               entry->mfn = mfn;
        }
 
 out:
@@ -443,7 +446,6 @@ xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_entry* entry)
        }
 
        entry->page = NULL;
-       entry->mfn = INVALID_MFN;
        __free_page(page);
 }
 
@@ -465,9 +467,8 @@ xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_entry* entry)
        }
 }
 
-struct xen_ia64_privcmd_file {
-       struct file*                    file;
-       atomic_t                        map_count;
+struct xen_ia64_privcmd_range {
+       atomic_t                        ref_count;
        unsigned long                   pgoff; // in PAGE_SIZE
 
        unsigned long                   num_entries;
@@ -475,7 +476,8 @@ struct xen_ia64_privcmd_file {
 };
 
 struct xen_ia64_privcmd_vma {
-       struct xen_ia64_privcmd_file*   file;
+       struct xen_ia64_privcmd_range*  range;
+
        unsigned long                   num_entries;
        struct xen_ia64_privcmd_entry*  entries;
 };
@@ -490,20 +492,19 @@ struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
 
 static void
 __xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
-                           struct xen_ia64_privcmd_vma* privcmd_vma)
+                           struct xen_ia64_privcmd_vma* privcmd_vma,
+                           struct xen_ia64_privcmd_range* privcmd_range)
 {
-       struct xen_ia64_privcmd_file* privcmd_file =
-               (struct xen_ia64_privcmd_file*)vma->vm_file->private_data;
-       unsigned long entry_offset = vma->vm_pgoff - privcmd_file->pgoff;
+       unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
        unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
        unsigned long i;
 
        BUG_ON(entry_offset < 0);
-       BUG_ON(entry_offset + num_entries > privcmd_file->num_entries);
+       BUG_ON(entry_offset + num_entries > privcmd_range->num_entries);
 
-       privcmd_vma->file = privcmd_file;
+       privcmd_vma->range = privcmd_range;
        privcmd_vma->num_entries = num_entries;
-       privcmd_vma->entries = &privcmd_file->entries[entry_offset];
+       privcmd_vma->entries = &privcmd_range->entries[entry_offset];
        vma->vm_private_data = privcmd_vma;
        for (i = 0; i < privcmd_vma->num_entries; i++) {
                xen_ia64_privcmd_entry_open(&privcmd_vma->entries[i]);
@@ -516,15 +517,14 @@ __xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
 static void
 xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
 {
-       struct xen_ia64_privcmd_file* privcmd_file =
-               (struct xen_ia64_privcmd_file*)vma->vm_file->private_data;
-       struct xen_ia64_privcmd_vma* privcmd_vma;
+       struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
+       struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
 
-       atomic_inc(&privcmd_file->map_count);
+       atomic_inc(&privcmd_range->ref_count);
        // vm_op->open() can't fail.
        privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
 
-       __xen_ia64_privcmd_vma_open(vma, privcmd_vma);
+       __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
 }
 
 static void
@@ -532,7 +532,7 @@ xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
 {
        struct xen_ia64_privcmd_vma* privcmd_vma =
                (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
-       struct xen_ia64_privcmd_file* privcmd_file = privcmd_vma->file;
+       struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
        unsigned long i;
 
        for (i = 0; i < privcmd_vma->num_entries; i++) {
@@ -541,17 +541,16 @@ xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
        vma->vm_private_data = NULL;
        kfree(privcmd_vma);
 
-       if (atomic_dec_and_test(&privcmd_file->map_count)) {
+       if (atomic_dec_and_test(&privcmd_range->ref_count)) {
 #if 1
-               for (i = 0; i < privcmd_file->num_entries; i++) {
+               for (i = 0; i < privcmd_range->num_entries; i++) {
                        struct xen_ia64_privcmd_entry* entry =
-                               &privcmd_vma->entries[i];
+                               &privcmd_range->entries[i];
                        BUG_ON(atomic_read(&entry->map_count) != 0);
                        BUG_ON(entry->page != NULL);
                }
 #endif
-               privcmd_file->file->private_data = NULL;
-               kfree(privcmd_file->file->private_data);
+               vfree(privcmd_range);
        }
 }
 
@@ -559,22 +558,16 @@ int
 privcmd_mmap(struct file * file, struct vm_area_struct * vma)
 {
        unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-       struct xen_ia64_privcmd_file* privcmd_file;
+       struct xen_ia64_privcmd_range* privcmd_range;
        struct xen_ia64_privcmd_vma* privcmd_vma;
        unsigned long i;
        BUG_ON(!running_on_xen);
 
-        /* DONTCOPY is essential for Xen as copy_page_range is broken. */
-        vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
-
-       if (file->private_data != NULL) {
-               return -EBUSY;
-       }
-
-       privcmd_file = kmalloc(sizeof(*privcmd_file) +
-                              sizeof(privcmd_file->entries[0]) * num_entries,
-                              GFP_KERNEL);
-       if (privcmd_file == NULL) {
+       BUG_ON(file->private_data != NULL);
+       privcmd_range =
+               vmalloc(sizeof(*privcmd_range) +
+                       sizeof(privcmd_range->entries[0]) * num_entries);
+       if (privcmd_range == NULL) {
                goto out_enomem0;
        }
        privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
@@ -582,22 +575,23 @@ privcmd_mmap(struct file * file, struct vm_area_struct * vma)
                goto out_enomem1;
        }
 
-       atomic_set(&privcmd_file->map_count, 1);
-       privcmd_file->num_entries = num_entries;
-       for (i = 0; i < privcmd_file->num_entries; i++) {
-               xen_ia64_privcmd_init_entry(&privcmd_file->entries[i]);
+       /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+       vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
+
+       atomic_set(&privcmd_range->ref_count, 1);
+       privcmd_range->pgoff = vma->vm_pgoff;
+       privcmd_range->num_entries = num_entries;
+       for (i = 0; i < privcmd_range->num_entries; i++) {
+               xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
        }
-       file->private_data = privcmd_file;
-       privcmd_file->file = file;
-       privcmd_file->pgoff = vma->vm_pgoff;
 
-       __xen_ia64_privcmd_vma_open(vma, privcmd_vma);
+       __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
        return 0;
 
 out_enomem1:
        kfree(privcmd_vma);
 out_enomem0:
-       kfree(privcmd_file);
+       vfree(privcmd_range);
        return -ENOMEM;
 }
 
@@ -625,7 +619,7 @@ direct_remap_pfn_range(struct vm_area_struct *vma,
        i = (address - vma->vm_start) >> PAGE_SHIFT;
        for (offset = 0; offset < size; offset += PAGE_SIZE) {
                struct xen_ia64_privcmd_entry* entry =
-                       &privcmd_vma->file->entries[i];
+                       &privcmd_vma->entries[i];
                error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, entry, mfn, prot, domid);
                if (error != 0) {
                        break;